[IA64] VTI: Use 16K page size to emulate guest physical mode
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Fri, 3 Mar 2006 20:03:39 +0000 (13:03 -0700)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Fri, 3 Mar 2006 20:03:39 +0000 (13:03 -0700)
Previously VMM use 4k to emulate guest physical mode on VTI-domain to
satisfy the requirement of speculation attribute in physical mode, please
refer to 4.4.6 Speculation Attributes of Itanium SDM 2

Seems like guest doesn't need to conform to this

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
xen/arch/ia64/vmx/vmx_phy_mode.c
xen/arch/ia64/vmx/vmx_process.c
xen/include/asm-ia64/vmx_phy_mode.h

index 0a3f8e94a907d2b3620d1f82ac0817fa72cde017..d708a54a7cf156f1d73cb59e035adb921a94b155 100644 (file)
@@ -104,57 +104,51 @@ physical_mode_init(VCPU *vcpu)
     vcpu->arch.mode_flags = GUEST_IN_PHY;
 }
 
-extern u64 get_mfn(struct domain *d, u64 gpfn);
 extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
-void
-physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr)
+/*void
+physical_itlb_miss(VCPU *vcpu, u64 vadr)
 {
     u64 psr;
     IA64_PSR vpsr;
-    u64 mppn,gppn;
+    u64 xen_mppn,xen_gppn;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
-    gppn=(vadr<<1)>>13;
-    mppn = get_mfn(vcpu->domain,gppn);
-    mppn=(mppn<<12)|(vpsr.cpl<<7); 
-//    if(vadr>>63)
-//       mppn |= PHY_PAGE_UC;
-//    else
-    mppn |= PHY_PAGE_WB;
+    xen_gppn=(vadr<<1)>>(PAGE_SHIFT+1);
+    xen_mppn = gmfn_to_mfn(vcpu->domain, xen_gppn);
+    xen_mppn=(xen_mppn<<PAGE_SHIFT)|(vpsr.cpl<<7);
+    if(vadr>>63)
+        xen_mppn |= PHY_PAGE_UC;
+    else
+        xen_mppn |= PHY_PAGE_WB;
 
     psr=ia64_clear_ic();
-    ia64_itc(1,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
+    ia64_itc(1,vadr&PAGE_MASK,xen_mppn,PAGE_SHIFT);
     ia64_set_psr(psr);
     ia64_srlz_i();
     return;
 }
 
-
-void
-physical_itlb_miss(VCPU *vcpu, u64 vadr)
-{
-        physical_itlb_miss_dom0(vcpu, vadr);
-}
-
-
+*/
+/* 
+ *      vec=1, itlb miss
+ *      vec=2, dtlb miss
+ */
 void
-physical_dtlb_miss(VCPU *vcpu, u64 vadr)
+physical_tlb_miss(VCPU *vcpu, u64 vadr, u64 vec)
 {
     u64 psr;
     IA64_PSR vpsr;
-    u64 mppn,gppn;
-//    if(vcpu->domain!=dom0)
-//        panic("dom n physical dtlb miss happen\n");
+    u64 xen_mppn,xen_gppn;
     vpsr.val=vmx_vcpu_get_psr(vcpu);
-    gppn=(vadr<<1)>>13;
-    mppn = get_mfn(vcpu->domain, gppn);
-    mppn=(mppn<<12)|(vpsr.cpl<<7);
+    xen_gppn=(vadr<<1)>>(PAGE_SHIFT+1);
+    xen_mppn = gmfn_to_mfn(vcpu->domain, xen_gppn);
+    xen_mppn=(xen_mppn<<PAGE_SHIFT)|(vpsr.cpl<<7);
     if(vadr>>63)
-        mppn |= PHY_PAGE_UC;
+        xen_mppn |= PHY_PAGE_UC;
     else
-        mppn |= PHY_PAGE_WB;
+        xen_mppn |= PHY_PAGE_WB;
 
     psr=ia64_clear_ic();
-    ia64_itc(2,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
+    ia64_itc(vec,vadr&PAGE_MASK,xen_mppn,PAGE_SHIFT);
     ia64_set_psr(psr);
     ia64_srlz_i();
     return;
@@ -193,13 +187,13 @@ vmx_load_all_rr(VCPU *vcpu)
        if (is_physical_mode(vcpu)) {
                if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
                        panic("Unexpected domain switch in phy emul\n");
-               phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
      phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+               phy_rr.rrval = vcpu->arch.metaphysical_rr0;
//    phy_rr.ps = PAGE_SHIFT;
        phy_rr.ve = 1;
 
                ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
-               phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
-       phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+               phy_rr.rrval = vcpu->arch.metaphysical_rr4;
+//     phy_rr.ps = PAGE_SHIFT;
            phy_rr.ve = 1;
 
                ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
@@ -242,12 +236,12 @@ switch_to_physical_rid(VCPU *vcpu)
     /* Save original virtual mode rr[0] and rr[4] */
     psr=ia64_clear_ic();
     phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
-    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+//    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
     phy_rr.ve = 1;
     ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
     ia64_srlz_d();
     phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
-    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
+//    phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
     phy_rr.ve = 1;
     ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
     ia64_srlz_d();
index 268f2d2acad9acc33726984834633e176b9c3d36..5dfead87069a38a610e39521b320186f651364e0 100644 (file)
@@ -315,23 +315,20 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
         return;
     }
 */
-    if(vadr == 0x1ea18c00 ){
+/*    if(vadr == 0x1ea18c00 ){
         ia64_clear_ic();
         while(1);
     }
+ */
     if(is_physical_mode(v)&&(!(vadr<<1>>62))){
-        if(vec==1){
-            physical_itlb_miss(v, vadr);
-            return IA64_FAULT;
-        }
         if(vec==2){
             if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
                 emulate_io_inst(v,((vadr<<1)>>1),4);   //  UC
-            }else{
-                physical_dtlb_miss(v, vadr);
+                return IA64_FAULT;
             }
-            return IA64_FAULT;
         }
+        physical_tlb_miss(v, vadr, vec);
+        return IA64_FAULT;
     }
     vrr = vmx_vcpu_rr(v, vadr);
     if(vec == 1) type = ISIDE_TLB;
index fab8a82bb25ddd27bc63c056ad6edc22155fb625..8cc35352c07360d13419bbc5627909b9d6e908bb 100644 (file)
 #define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
 #define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
 
-#ifdef PHY_16M  /* 16M: large granule for test*/
-#define EMUL_PHY_PAGE_SHIFT 24
-#else   /* 4K: emulated physical page granule */
-#define EMUL_PHY_PAGE_SHIFT 12
-#endif
+//#ifdef PHY_16M  /* 16M: large granule for test*/
+//#define EMUL_PHY_PAGE_SHIFT 24
+//#else   /* 4K: emulated physical page granule */
+//#define EMUL_PHY_PAGE_SHIFT 12
+//#endif
 #define IA64_RSC_MODE       0x0000000000000003
 #define XEN_RR7_RID    (0xf00010)
 #define GUEST_IN_PHY    0x1
@@ -96,8 +96,7 @@ extern void prepare_if_physical_mode(VCPU *vcpu);
 extern void recover_if_physical_mode(VCPU *vcpu);
 extern void vmx_init_all_rr(VCPU *vcpu);
 extern void vmx_load_all_rr(VCPU *vcpu);
-extern void physical_itlb_miss(VCPU *vcpu, u64 vadr);
-extern void physical_dtlb_miss(VCPU *vcpu, u64 vadr);
+extern void physical_tlb_miss(VCPU *vcpu, u64 vadr, u64 vec);
 /*
  * No sanity check here, since all psr changes have been
  * checked in switch_mm_mode().